3 // Copyright (C) 2008, 2009, 2010, 2011
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
26 /** @file bits/atomic_0.h
27 * This is an internal header file, included by other library headers.
28 * Do not attempt to use it directly. @headername{atomic}
31 #ifndef _GLIBCXX_ATOMIC_0_H
32 #define _GLIBCXX_ATOMIC_0_H 1
34 #pragma GCC system_header
36 namespace std _GLIBCXX_VISIBILITY(default)
38 _GLIBCXX_BEGIN_NAMESPACE_VERSION
40 // 0 == __atomic0 == Never lock-free
43 _GLIBCXX_BEGIN_EXTERN_C
46 atomic_flag_clear_explicit(__atomic_flag_base*, memory_order)
50 __atomic_flag_wait_explicit(__atomic_flag_base*, memory_order)
53 _GLIBCXX_CONST __atomic_flag_base*
54 __atomic_flag_for_address(const volatile void* __z) _GLIBCXX_NOTHROW;
58 // Implementation specific defines.
59 #define _ATOMIC_MEMBER_ _M_i
61 // Implementation specific defines.
62 #define _ATOMIC_LOAD_(__a, __x) \
63 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
64 __i_type* __p = &_ATOMIC_MEMBER_; \
65 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
66 __atomic_flag_wait_explicit(__g, __x); \
67 __i_type __r = *__p; \
68 atomic_flag_clear_explicit(__g, __x); \
71 #define _ATOMIC_STORE_(__a, __n, __x) \
72 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
73 __i_type* __p = &_ATOMIC_MEMBER_; \
74 __typeof__(__n) __w = (__n); \
75 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
76 __atomic_flag_wait_explicit(__g, __x); \
78 atomic_flag_clear_explicit(__g, __x); \
81 #define _ATOMIC_MODIFY_(__a, __o, __n, __x) \
82 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
83 __i_type* __p = &_ATOMIC_MEMBER_; \
84 __typeof__(__n) __w = (__n); \
85 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
86 __atomic_flag_wait_explicit(__g, __x); \
87 __i_type __r = *__p; \
89 atomic_flag_clear_explicit(__g, __x); \
92 #define _ATOMIC_CMPEXCHNG_(__a, __e, __n, __x) \
93 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
94 __i_type* __p = &_ATOMIC_MEMBER_; \
95 __typeof__(__e) __q = (__e); \
96 __typeof__(__n) __w = (__n); \
98 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
99 __atomic_flag_wait_explicit(__g, __x); \
100 __i_type __t = *__p; \
103 *__p = (__i_type)__w; \
106 else { *__q = __t; __r = false; } \
107 atomic_flag_clear_explicit(__g, __x); \
112 struct atomic_flag : public __atomic_flag_base
114 atomic_flag() = default;
115 ~atomic_flag() = default;
116 atomic_flag(const atomic_flag&) = delete;
117 atomic_flag& operator=(const atomic_flag&) = delete;
118 atomic_flag& operator=(const atomic_flag&) volatile = delete;
120 // Conversion to ATOMIC_FLAG_INIT.
121 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
124 test_and_set(memory_order __m = memory_order_seq_cst);
127 test_and_set(memory_order __m = memory_order_seq_cst) volatile;
130 clear(memory_order __m = memory_order_seq_cst);
133 clear(memory_order __m = memory_order_seq_cst) volatile;
137 /// Base class for atomic integrals.
139 // For each of the integral types, define atomic_[integral type] struct
143 // atomic_schar signed char
144 // atomic_uchar unsigned char
145 // atomic_short short
146 // atomic_ushort unsigned short
148 // atomic_uint unsigned int
150 // atomic_ulong unsigned long
151 // atomic_llong long long
152 // atomic_ullong unsigned long long
153 // atomic_char16_t char16_t
154 // atomic_char32_t char32_t
155 // atomic_wchar_t wchar_t
158 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
159 // since that is what GCC built-in functions for atomic memory access work on.
160 template<typename _ITp>
164 typedef _ITp __int_type;
169 __atomic_base() = default;
170 ~__atomic_base() = default;
171 __atomic_base(const __atomic_base&) = delete;
172 __atomic_base& operator=(const __atomic_base&) = delete;
173 __atomic_base& operator=(const __atomic_base&) volatile = delete;
175 // Requires __int_type convertible to _M_base._M_i.
176 constexpr __atomic_base(__int_type __i): _M_i (__i) { }
178 operator __int_type() const
181 operator __int_type() const volatile
185 operator=(__int_type __i)
192 operator=(__int_type __i) volatile
200 { return fetch_add(1); }
203 operator++(int) volatile
204 { return fetch_add(1); }
208 { return fetch_sub(1); }
211 operator--(int) volatile
212 { return fetch_sub(1); }
216 { return fetch_add(1) + 1; }
219 operator++() volatile
220 { return fetch_add(1) + 1; }
224 { return fetch_sub(1) - 1; }
227 operator--() volatile
228 { return fetch_sub(1) - 1; }
231 operator+=(__int_type __i)
232 { return fetch_add(__i) + __i; }
235 operator+=(__int_type __i) volatile
236 { return fetch_add(__i) + __i; }
239 operator-=(__int_type __i)
240 { return fetch_sub(__i) - __i; }
243 operator-=(__int_type __i) volatile
244 { return fetch_sub(__i) - __i; }
247 operator&=(__int_type __i)
248 { return fetch_and(__i) & __i; }
251 operator&=(__int_type __i) volatile
252 { return fetch_and(__i) & __i; }
255 operator|=(__int_type __i)
256 { return fetch_or(__i) | __i; }
259 operator|=(__int_type __i) volatile
260 { return fetch_or(__i) | __i; }
263 operator^=(__int_type __i)
264 { return fetch_xor(__i) ^ __i; }
267 operator^=(__int_type __i) volatile
268 { return fetch_xor(__i) ^ __i; }
275 is_lock_free() const volatile
279 store(__int_type __i, memory_order __m = memory_order_seq_cst)
281 __glibcxx_assert(__m != memory_order_acquire);
282 __glibcxx_assert(__m != memory_order_acq_rel);
283 __glibcxx_assert(__m != memory_order_consume);
284 _ATOMIC_STORE_(this, __i, __m);
288 store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
290 __glibcxx_assert(__m != memory_order_acquire);
291 __glibcxx_assert(__m != memory_order_acq_rel);
292 __glibcxx_assert(__m != memory_order_consume);
293 _ATOMIC_STORE_(this, __i, __m);
297 load(memory_order __m = memory_order_seq_cst) const
299 __glibcxx_assert(__m != memory_order_release);
300 __glibcxx_assert(__m != memory_order_acq_rel);
301 return _ATOMIC_LOAD_(this, __m);
305 load(memory_order __m = memory_order_seq_cst) const volatile
307 __glibcxx_assert(__m != memory_order_release);
308 __glibcxx_assert(__m != memory_order_acq_rel);
309 return _ATOMIC_LOAD_(this, __m);
313 exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
314 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
317 exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
318 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
321 compare_exchange_weak(__int_type& __i1, __int_type __i2,
322 memory_order __m1, memory_order __m2)
324 __glibcxx_assert(__m2 != memory_order_release);
325 __glibcxx_assert(__m2 != memory_order_acq_rel);
326 __glibcxx_assert(__m2 <= __m1);
327 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
331 compare_exchange_weak(__int_type& __i1, __int_type __i2,
332 memory_order __m1, memory_order __m2) volatile
334 __glibcxx_assert(__m2 != memory_order_release);
335 __glibcxx_assert(__m2 != memory_order_acq_rel);
336 __glibcxx_assert(__m2 <= __m1);
337 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
341 compare_exchange_weak(__int_type& __i1, __int_type __i2,
342 memory_order __m = memory_order_seq_cst)
344 return compare_exchange_weak(__i1, __i2, __m,
345 __calculate_memory_order(__m));
349 compare_exchange_weak(__int_type& __i1, __int_type __i2,
350 memory_order __m = memory_order_seq_cst) volatile
352 return compare_exchange_weak(__i1, __i2, __m,
353 __calculate_memory_order(__m));
357 compare_exchange_strong(__int_type& __i1, __int_type __i2,
358 memory_order __m1, memory_order __m2)
360 __glibcxx_assert(__m2 != memory_order_release);
361 __glibcxx_assert(__m2 != memory_order_acq_rel);
362 __glibcxx_assert(__m2 <= __m1);
363 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
367 compare_exchange_strong(__int_type& __i1, __int_type __i2,
368 memory_order __m1, memory_order __m2) volatile
370 __glibcxx_assert(__m2 != memory_order_release);
371 __glibcxx_assert(__m2 != memory_order_acq_rel);
372 __glibcxx_assert(__m2 <= __m1);
373 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
377 compare_exchange_strong(__int_type& __i1, __int_type __i2,
378 memory_order __m = memory_order_seq_cst)
380 return compare_exchange_strong(__i1, __i2, __m,
381 __calculate_memory_order(__m));
385 compare_exchange_strong(__int_type& __i1, __int_type __i2,
386 memory_order __m = memory_order_seq_cst) volatile
388 return compare_exchange_strong(__i1, __i2, __m,
389 __calculate_memory_order(__m));
393 fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
394 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
397 fetch_add(__int_type __i,
398 memory_order __m = memory_order_seq_cst) volatile
399 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
402 fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
403 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
406 fetch_sub(__int_type __i,
407 memory_order __m = memory_order_seq_cst) volatile
408 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
411 fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
412 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
415 fetch_and(__int_type __i,
416 memory_order __m = memory_order_seq_cst) volatile
417 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
420 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
421 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
424 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
425 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
428 fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
429 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
432 fetch_xor(__int_type __i,
433 memory_order __m = memory_order_seq_cst) volatile
434 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
438 /// Partial specialization for pointer types.
439 template<typename _PTp>
440 struct __atomic_base<_PTp*>
443 typedef _PTp* __return_pointer_type;
444 typedef void* __pointer_type;
448 __atomic_base() = default;
449 ~__atomic_base() = default;
450 __atomic_base(const __atomic_base&) = delete;
451 __atomic_base& operator=(const __atomic_base&) = delete;
452 __atomic_base& operator=(const __atomic_base&) volatile = delete;
454 // Requires __pointer_type convertible to _M_i.
455 constexpr __atomic_base(__return_pointer_type __p): _M_i (__p) { }
457 operator __return_pointer_type() const
458 { return reinterpret_cast<__return_pointer_type>(load()); }
460 operator __return_pointer_type() const volatile
461 { return reinterpret_cast<__return_pointer_type>(load()); }
463 __return_pointer_type
464 operator=(__pointer_type __p)
467 return reinterpret_cast<__return_pointer_type>(__p);
470 __return_pointer_type
471 operator=(__pointer_type __p) volatile
474 return reinterpret_cast<__return_pointer_type>(__p);
477 __return_pointer_type
479 { return reinterpret_cast<__return_pointer_type>(fetch_add(1)); }
481 __return_pointer_type
482 operator++(int) volatile
483 { return reinterpret_cast<__return_pointer_type>(fetch_add(1)); }
485 __return_pointer_type
487 { return reinterpret_cast<__return_pointer_type>(fetch_sub(1)); }
489 __return_pointer_type
490 operator--(int) volatile
491 { return reinterpret_cast<__return_pointer_type>(fetch_sub(1)); }
493 __return_pointer_type
495 { return reinterpret_cast<__return_pointer_type>(fetch_add(1) + 1); }
497 __return_pointer_type
498 operator++() volatile
499 { return reinterpret_cast<__return_pointer_type>(fetch_add(1) + 1); }
501 __return_pointer_type
503 { return reinterpret_cast<__return_pointer_type>(fetch_sub(1) - 1); }
505 __return_pointer_type
506 operator--() volatile
507 { return reinterpret_cast<__return_pointer_type>(fetch_sub(1) - 1); }
509 __return_pointer_type
510 operator+=(ptrdiff_t __d)
511 { return reinterpret_cast<__return_pointer_type>(fetch_add(__d) + __d); }
513 __return_pointer_type
514 operator+=(ptrdiff_t __d) volatile
515 { return reinterpret_cast<__return_pointer_type>(fetch_add(__d) + __d); }
517 __return_pointer_type
518 operator-=(ptrdiff_t __d)
519 { return reinterpret_cast<__return_pointer_type>(fetch_sub(__d) - __d); }
521 __return_pointer_type
522 operator-=(ptrdiff_t __d) volatile
523 { return reinterpret_cast<__return_pointer_type>(fetch_sub(__d) - __d); }
530 is_lock_free() const volatile
534 store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
536 __glibcxx_assert(__m != memory_order_acquire);
537 __glibcxx_assert(__m != memory_order_acq_rel);
538 __glibcxx_assert(__m != memory_order_consume);
539 _ATOMIC_STORE_(this, __p, __m);
543 store(__pointer_type __p,
544 memory_order __m = memory_order_seq_cst) volatile
546 __glibcxx_assert(__m != memory_order_acquire);
547 __glibcxx_assert(__m != memory_order_acq_rel);
548 __glibcxx_assert(__m != memory_order_consume);
549 volatile __pointer_type* __p2 = &_M_i;
550 __typeof__(__p) __w = (__p);
551 __atomic_flag_base* __g = __atomic_flag_for_address(__p2);
552 __atomic_flag_wait_explicit(__g, __m);
553 *__p2 = reinterpret_cast<__pointer_type>(__w);
554 atomic_flag_clear_explicit(__g, __m);
558 __return_pointer_type
559 load(memory_order __m = memory_order_seq_cst) const
561 __glibcxx_assert(__m != memory_order_release);
562 __glibcxx_assert(__m != memory_order_acq_rel);
563 void* __v = _ATOMIC_LOAD_(this, __m);
564 return reinterpret_cast<__return_pointer_type>(__v);
567 __return_pointer_type
568 load(memory_order __m = memory_order_seq_cst) const volatile
570 __glibcxx_assert(__m != memory_order_release);
571 __glibcxx_assert(__m != memory_order_acq_rel);
572 void* __v = _ATOMIC_LOAD_(this, __m);
573 return reinterpret_cast<__return_pointer_type>(__v);
576 __return_pointer_type
577 exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
579 void* __v = _ATOMIC_MODIFY_(this, =, __p, __m);
580 return reinterpret_cast<__return_pointer_type>(__v);
583 __return_pointer_type
584 exchange(__pointer_type __p,
585 memory_order __m = memory_order_seq_cst) volatile
587 volatile __pointer_type* __p2 = &_M_i;
588 __typeof__(__p) __w = (__p);
589 __atomic_flag_base* __g = __atomic_flag_for_address(__p2);
590 __atomic_flag_wait_explicit(__g, __m);
591 __pointer_type __r = *__p2;
593 atomic_flag_clear_explicit(__g, __m);
595 return reinterpret_cast<__return_pointer_type>(_M_i);
599 compare_exchange_strong(__return_pointer_type& __rp1, __pointer_type __p2,
600 memory_order __m1, memory_order __m2)
602 __glibcxx_assert(__m2 != memory_order_release);
603 __glibcxx_assert(__m2 != memory_order_acq_rel);
604 __glibcxx_assert(__m2 <= __m1);
605 __pointer_type& __p1 = reinterpret_cast<void*&>(__rp1);
606 return _ATOMIC_CMPEXCHNG_(this, &__p1, __p2, __m1);
610 compare_exchange_strong(__return_pointer_type& __rp1, __pointer_type __p2,
611 memory_order __m1, memory_order __m2) volatile
613 __glibcxx_assert(__m2 != memory_order_release);
614 __glibcxx_assert(__m2 != memory_order_acq_rel);
615 __glibcxx_assert(__m2 <= __m1);
616 __pointer_type& __p1 = reinterpret_cast<void*&>(__rp1);
617 return _ATOMIC_CMPEXCHNG_(this, &__p1, __p2, __m1);
620 __return_pointer_type
621 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
623 void* __v = _ATOMIC_MODIFY_(this, +=, __d, __m);
624 return reinterpret_cast<__return_pointer_type>(__v);
627 __return_pointer_type
628 fetch_add(ptrdiff_t __d,
629 memory_order __m = memory_order_seq_cst) volatile
631 void* __v = _ATOMIC_MODIFY_(this, +=, __d, __m);
632 return reinterpret_cast<__return_pointer_type>(__v);
635 __return_pointer_type
636 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
638 void* __v = _ATOMIC_MODIFY_(this, -=, __d, __m);
639 return reinterpret_cast<__return_pointer_type>(__v);
642 __return_pointer_type
643 fetch_sub(ptrdiff_t __d,
644 memory_order __m = memory_order_seq_cst) volatile
646 void* __v = _ATOMIC_MODIFY_(this, -=, __d, __m);
647 return reinterpret_cast<__return_pointer_type>(__v);
652 #undef _ATOMIC_STORE_
653 #undef _ATOMIC_MODIFY_
654 #undef _ATOMIC_CMPEXCHNG_
655 } // namespace __atomic0
657 _GLIBCXX_END_NAMESPACE_VERSION